set_pte(pte, pfn_pte(maddr >> PAGE_SHIFT,
__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
+ mb ();
//XXX CONFIG_XEN_IA64_DOM0_VP
// TODO racy
set_gpfn_from_mfn(page_to_mfn(p), mpaddr >> PAGE_SHIFT);
set_pte(pte,
pfn_pte(physaddr >> PAGE_SHIFT,
__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
+ mb ();
} else
printk("%s: mpaddr %lx already mapped!\n", __func__, mpaddr);
}
struct mm_struct *mm = &d->arch.mm;
pte_t* pte;
pte_t old_pte;
+ pte_t npte;
pte = lookup_alloc_domain_pte(d, mpaddr);
// update pte
- old_pte = ptep_get_and_clear(mm, mpaddr, pte);
- set_pte(pte, pfn_pte(mfn,
- __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
+ npte = pfn_pte(mfn, __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX));
+ old_pte = ptep_xchg(mm, mpaddr, pte, npte);
if (!pte_none(old_pte)) {
unsigned long old_mfn;
struct page_info* old_page;
#endif
}
+static inline pte_t
+ptep_xchg(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t npte)
+{
+#ifdef CONFIG_SMP
+ return __pte(xchg((long *) ptep, pte_val(npte)));
+#else
+ pte_t pte = *ptep;
+ set_pte (ptep, npte);
+ return pte;
+#endif
+}
+
#ifndef XEN
static inline void
ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)